In [1]:
import pdb
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
from datetime import datetime
import pdb

%matplotlib inline

In [2]:
'''Useful functions'''
def sigmoid(z):
    return 1/ (1+ np.exp(-z))  

def sigmoidPrime(z):
    return np.exp(-z) / ((1+np.exp(-z))**2)

In [3]:
import numpy as np
from numpy.linalg import norm

'''Neural Network parameters'''

class Neural_Network(object):
    def __init__(self, inputLayerSize, outputLayerSize, hiddenLayerSize, Lambda):
        #Define Hyperparameters:
        self.inputLayerSize = inputLayerSize 
        self.outputLayerSize = outputLayerSize
        
        # hidden layers 
        self.nbhiddenLayers = len(hiddenLayerSize)
        self.hiddenLayerSize = hiddenLayerSize
        
        self.Lambda = Lambda
        
        # weights matrices
        # they are randomly initialized here
        # we add +1 for the biais on each inputs of W
        self.W = []
        self.W.append(np.random.rand(self.inputLayerSize + 1,self.hiddenLayerSize[0]))
        for i in range(self.nbhiddenLayers - 1):
            self.W.append(np.random.rand(self.hiddenLayerSize[i] + 1,self.hiddenLayerSize[i+1]))
        self.W.append(np.random.rand(self.hiddenLayerSize[-1] + 1,self.outputLayerSize))
        
        
        # biais , no biais for inputs
        self.biais = []
        self.biais.append(np.zeros(1))
        for i in range(self.nbhiddenLayers):
            self.biais.append( np.random.randn(self.hiddenLayerSize[i], 1) )
            
            
    def forward(self, X): 
        # propagate 
        self.z = []
        self.a = []
        X_copy = X
        for i in range(self.nbhiddenLayers+1):
            
            z = np.dot(X_copy, self.W[i]) +self.biais[i]
            a = sigmoid(z)
      
            self.z.append(z)
            self.a.append(a)
            
            X_copy = a
            
        return self.a[-1]
        
    def costFunction(self, X, y):
        #Compute cost for given X,y, use weights already stored in class.
        self.yHat = self.forward(X)
        J = 0.5*sum((y-self.yHat)**2)/X.shape[0] 
        # normlisation : + (self.Lambda/2)*(norm(self.W1)+norm(self.W2)) 
        return J     
        
    def costFunctionPrime(self, X, y):
        #Compute derivative with respect to W and W2 for a given X and y:
        self.yHat = self.forward(X)
        
        self.delta = []
        self.dJdW = []
        self.delta.insert(0, np.multiply(-(y-self.yHat), sigmoidPrime(self.z[-1])))
        #Add gradient of regularization term:
        self.dJdW.insert(0,np.dot(self.a[-2].T, self.delta[0])/X.shape[0] )
                    # + self.Lambda*self.W2 regu
        
        for i in range(self.nbhiddenLayers-1,0,-1):            
            self.delta.insert(0, np.dot(self.delta[0], self.W[i+1].T)*sigmoidPrime(self.z[i]))
            #Add gradient of regularization term:
            self.dJdW.insert( 0, np.dot(self.a[i-1].T, self.delta[0])/X.shape[0] )
                             #+ self.Lambda*self.W1 regu
        
        self.delta.insert(0, np.dot(self.delta[0], self.W[1].T)*sigmoidPrime(self.z[0]))
        #Add gradient of regularization term:
        self.dJdW.insert( 0, np.dot(X.T, self.delta[0])/X.shape[0] )
        
    def getParams(self):
        #get W1,W2,..Wi into a flat vector
        params = np.concatenate([array.ravel() for array in NN.W])
        return params
    
    def setParams(self, params):
        #unrolling of Wi... vectors
        W_start = 0
        W_end = self.hiddenLayerSize[0]*self.inputLayerSize
        self.W = []
        self.W.append(np.reshape(params[W_start: W_end], \
                            (self.inputLayerSize,self.hiddenLayerSize[0])))
        for i in range(self.nbhiddenLayers -1):
            W_start = W_end
            W_end = W_end + self.hiddenLayerSize[i]* self.hiddenLayerSize[i+1]
            self.W.append(np.reshape(params[W_start: W_end], \
                            (self.hiddenLayerSize[i],self.hiddenLayerSize[i+1])))
                    
        W_start = W_end
        W_end = W_end + self.hiddenLayerSize[self.nbhiddenLayers -1]* self.outputLayerSize
        self.W.append( np.reshape(params[W_start: W_end], \
                            (self.hiddenLayerSize[self.nbhiddenLayers -1],self.outputLayerSize)) )
    
    def computeGradients(self, X, y):
        # get gradients into a flat vector
        self.costFunctionPrime(X, y)
        return np.concatenate([array.ravel() for array in self.dJdW])

In [6]:
from scipy import optimize
'''Trainer for the Neural Networks'''
class Trainer(object):
    def __init__(self, N):
        self.N = N
    
    def callbackF(self, params):
        # callback function keeping Cost in memory
        self.N.setParams(params)
        self.J.append(self.N.costFunction(self.X, self.y))  
    
    def costFunctionWrapper(self, params, X, y):
        # function returning cost and grad
        self.N.setParams(params)
        cost = self.N.costFunction(X,y)
        grad = self.N.computeGradients(X,y)
        return cost, grad
    
    def train(self, X, y):
        
        self.X= X
        self.y= y
        
        # make empty list to store costs
        self.J = []
        params0 = self.N.getParams()
        
        # algo used here is L-BFGS. 
        options ={'maxiter' : 200000, 'disp':True}
        
        _res = optimize.minimize(self.costFunctionWrapper, params0, \
                                jac = True, method ='BFGS', args = (X,y) \
                                 , callback=self.callbackF
                                )
        self.N.setParams(_res.x)
        self.optimizationResults = _res

In [7]:
'''Test of Neural Net'''
n_samples = 1000
inputLayerSize = 5 
outputLayerSize = 1
# hiddenLayerSize = 3
Lambda = 0.0001 
NN = Neural_Network(inputLayerSize = inputLayerSize, outputLayerSize = outputLayerSize, hiddenLayerSize = [1,4,5], Lambda = 0.0001  )
T = Trainer(NN)
X = np.random.rand(n_samples,inputLayerSize)
y = np.random.rand(n_samples,outputLayerSize)
T.train(X, y)

Prediction for Electricity production


In [ ]:
'''Data loading'''
f_names_national = [
    '2012 Conso Prod.csv',
    '2013 Conso Prod.csv',
    '2014 Conso Prod.csv',
    '2015 Conso Prod.csv'
]

datas = []
data_news = []
for f_name in f_names_national:
#     print(f_name)
    data = pd.read_csv('data/'+ f_name, delimiter='\t', encoding = "ISO-8859-1")
    pd.set_option('max_columns', 100)
    headers = list(data)
    data = data[data.Consommation.notnull()]
    data = data[data.Date.notnull()]
    data['timestamp'] = [str(d) + ' ' + str(t) for d, t in zip(data['Date'].values, data['Heures'].values)]
    data['timestamp'] = pd.to_datetime(data['timestamp'], format='%Y-%m-%d %H:%M')
    datas.append(data)

data_final = pd.concat(datas).reset_index()

In [ ]:
'''Plot of the Electricity Consumption in France over 3 years'''
ts =pd.Series(data_final['Consommation'].values, index = data_final['timestamp'].values )
fig, ax = plt.subplots(figsize=(20,5))
ax.plot(ts[::10].index, ts[::10])
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
ax.set_title(" Electricity consumption in France in MW")

In [ ]:
# Sorting values : le premier élement du data set est le plus récent : 
# y(t), y(t-1) etc... 
data_final = data_final.sort_values(by=['Date','Heures'], ascending=[False,False])

In [ ]:
'''tau : paramètre de périodicité'''
tau = 48 # 48 : on considère une corrélation de 24h. On pourrait prendre tau = 1 an 
            # afin de correler les données avec les données de l'année passée

def data_labels(dataframe=data_final, field='Consommation', tau = tau):
    X = dataframe[field].values
    X_ = np.stack([np.roll(X,i) for i in range(49)], axis=1)

    labels = X_[:,:1]
    data = X_[:,1:]
    return data, labels

Generating Training and CV sets


In [ ]:
# Creating the training set and the crossvalidation set.
# two years of training, 1 year for cv 
data_train, labels_train = data_labels(dataframe = data_final[data_final['Date'] <= '2014-12-31'])
data_test, labels_test = data_labels(dataframe = data_final[data_final['Date'] > '2014-12-31'])

Test on Train Data


In [35]:
n_samples = X_train.shape[0]
# inputLayerSize = 2 
# outputLayerSize = 1
# hiddenLayerSize = 3
Lambda = 0.01 
NN = Neural_Network(inputLayerSize = X_train.shape[1], outputLayerSize = 1, hiddenLayerSize = [20,20,10], Lambda = 0.01  )
T = Trainer(NN)

In [36]:
T.train(X_train, Y_train)
plt.figure(figsize=(20,5))
plt.plot(T.J)
plt.plot(T.J)
plt.grid(1)
plt.xlabel('Iterations')
plt.ylabel('Cost')


---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-36-4d2b1b525ea0> in <module>()
----> 1 T.train(X_train, Y_train)
      2 plt.figure(figsize=(20,5))
      3 plt.plot(T.J)
      4 plt.plot(T.J)
      5 plt.grid(1)

<ipython-input-26-493702adff91> in train(self, X, y)
     24         options ={'maxiter' : 200000, 'disp':True}
     25 
---> 26         _res = optimize.minimize(self.costFunctionWrapper, params0,                                 jac = False, method ='BFGS', args = (X,y)                                  , callback=self.callbackF
     27                                 )
     28         self.N.setParams(_res.x)

/home/benlet/anaconda3/lib/python3.5/site-packages/scipy/optimize/_minimize.py in minimize(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options)
    442         return _minimize_cg(fun, x0, args, jac, callback, **options)
    443     elif meth == 'bfgs':
--> 444         return _minimize_bfgs(fun, x0, args, jac, callback, **options)
    445     elif meth == 'newton-cg':
    446         return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback,

/home/benlet/anaconda3/lib/python3.5/site-packages/scipy/optimize/optimize.py in _minimize_bfgs(fun, x0, args, jac, callback, gtol, norm, eps, maxiter, disp, return_all, **unknown_options)
    911     else:
    912         grad_calls, myfprime = wrap_function(fprime, args)
--> 913     gfk = myfprime(x0)
    914     k = 0
    915     N = len(x0)

/home/benlet/anaconda3/lib/python3.5/site-packages/scipy/optimize/optimize.py in function_wrapper(*wrapper_args)
    290     def function_wrapper(*wrapper_args):
    291         ncalls[0] += 1
--> 292         return function(*(wrapper_args + args))
    293 
    294     return ncalls, function_wrapper

/home/benlet/anaconda3/lib/python3.5/site-packages/scipy/optimize/optimize.py in approx_fprime(xk, f, epsilon, *args)
    686 
    687     """
--> 688     return _approx_fprime_helper(xk, f, epsilon, args=args)
    689 
    690 

/home/benlet/anaconda3/lib/python3.5/site-packages/scipy/optimize/optimize.py in _approx_fprime_helper(xk, f, epsilon, args, f0)
    626         ei[k] = 1.0
    627         d = epsilon * ei
--> 628         grad[k] = (f(*((xk + d,) + args)) - f0) / d[k]
    629         ei[k] = 0.0
    630     return grad

TypeError: unsupported operand type(s) for -: 'tuple' and 'tuple'

In [15]:
plt.plot(NN.forward(X_test))


Out[15]:
[<matplotlib.lines.Line2D at 0x7f50e00eb9b0>]

In [ ]: